home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Oh!X 2000 Spring
/
Oh!X 2000 Spring Special CD-ROM (Japan) (Part 1).7z
/
Oh!X 2000 Spring Special CD-ROM (Japan) (Part 1).bin
/
F2JW
/
trans
/
sentence.cpp
< prev
next >
Wrap
C/C++ Source or Header
|
1999-08-22
|
37KB
|
1,495 lines
//
// フランス語 → 日本語 翻訳プログラム
//
// 文章構成 (主にセンテンスがらみ)
#include "stdafx.h"
#include "f2j.h"
#include "score.h"
#include "myprot.h"
extern BOOL isChanged;
extern TOKEN *lastToken;
extern TOKEN *currentTree;
//
// 比較形容詞を持つ文章 + <que> + <主語> + <動詞>
//
void
CombineMoreThan(TOKEN *start)
{
TOKEN *p;
TOKEN *lastOwner = NULL, *lastThan = NULL;
TOKEN *compWord = NULL;
for(p = start->next; p; p = p->next) {
TOKEN *owner = IsWordOwner(p, FR_ATTRIB_COMPARE);
if(owner
&& !SearchToken(FR_PART_CONJUNCTION_THAN, owner->child)) {
lastOwner = p;
lastThan = NULL;
} else if(IsObjectMatch(p, FR_PART_CONJUNCTION_THAN)) {
lastThan = p;
} else if(ToWaitCombineMoreThan(lastThan, p)) {
lastOwner = lastThan = NULL;
}
}
if(lastOwner == NULL
|| lastThan == NULL
|| lastThan->next == NULL)
return;
compWord =IsWordOwner(lastOwner, FR_ATTRIB_COMPARE);
if(compWord == NULL) return;
if(lastThan->object1 == NULL
&& IsObjectMatch(lastThan->next, (FR_PART)(FR_PART_NOUN_PERSONAL | FR_PART_NOUN_GENERAL | FR_PART_NOUN_DEMONSTRATIVE))) {
for(TOKEN *q = lastThan; q; q = q->next) {
q = SearchToken(FR_PART_VERB_ALL, q);
if(q == NULL) break;
if((q->frPart & FR_PART_COMBINE) && q->object1 == NULL)
continue;
break;
}
if(q == NULL) {
TOKEN *sub = lastThan->next;
DisconnectTOKEN(start, sub);
lastThan->object1 = sub;
}
}
else if(IsObjectMatch(lastThan->next, FR_PART_ADVERB_ALL)) {
TOKEN *adv = lastThan->next;
DisconnectTOKEN(start, adv);
AddChildTOKEN(lastThan, adv);
}
// Marie est aussi intelligent que jolie.
else if(IsObjectMatch(lastThan->next, FR_PART_ADJECTIVE_ALL)
&& lastThan->next->next == NULL) {
TOKEN *adj = lastThan->next;
DisconnectTOKEN(start, adj);
AddChildTOKEN(lastThan, adj);
}
else if(lastThan->object1 == NULL
&& IsObjectMatch(lastThan->next, FR_PART_SENTENCE_ALL)
&& lastThan->next->next == NULL) {
TOKEN *sentence = lastThan->next;
DisconnectTOKEN(start, sentence);
lastThan->object1 = sentence;
}
else
return;
DisconnectTOKEN(start, lastThan);
AddChildTOKEN(compWord, lastThan);
compWord->scoreCombine += SCORE_MORE_THAN;
}
//
//
//
BOOL
ToWaitCombineMoreThan(TOKEN *than, TOKEN *p)
{
// 検討する意味がない
if(than == NULL)
return(TRUE);
// 直後の文章は通す eg) than S+V
if(than->next != p
&& p->frPart == FR_PART_SENTENCE_NORMAL)
return(TRUE);
return(FALSE);
}
//
// 比較級・最上級の単語を持っているか
//
TOKEN
*IsWordOwner(TOKEN *p, FR_ATTRIB frAttrib)
{
TOKEN *q;
TOKEN *ret;
if(p == NULL) return(NULL);
if(p->frAttrib & frAttrib) return(p);
for(q = p->child; q; q = q->next) {
ret = IsWordOwner(q, frAttrib);
if(ret != NULL) return(ret);
}
for(q = p->object1; q; q = q->next) {
ret = IsWordOwner(p->object1, frAttrib);
if(ret != NULL) return(ret);
}
for(q = p->object2; q; q = q->next) {
ret = IsWordOwner(p->object2, frAttrib);
if(ret != NULL) return(ret);
}
return(NULL);
}
//
// 指定されたFR_PARTを持っているか
//
TOKEN
*IsWordOwner(TOKEN *p, FR_PART frPart)
{
TOKEN *q;
TOKEN *ret;
if(p == NULL) return(NULL);
if(IsObjectMatch(p, frPart)) return(p);
for(q = p->child; q; q = q->next) {
ret = IsWordOwner(q, frPart);
if(ret != NULL) return(ret);
}
for(q = p->object1; q; q = q->next) {
ret = IsWordOwner(p->object1, frPart);
if(ret != NULL) return(ret);
}
for(q = p->object2; q; q = q->next) {
ret = IsWordOwner(p->object2, frPart);
if(ret != NULL) return(ret);
}
return(NULL);
}
//
// 序数を持っているか
//
TOKEN
*IsNumericOrderOwner(TOKEN *p)
{
TOKEN *q;
TOKEN *ret;
if(p == NULL) return(NULL);
if(p->frPart == FR_PART_NUMETRIC_ORDER) return(p);
for(q = p->child; q; q = q->next) {
ret = IsNumericOrderOwner(q);
if(ret != NULL) return(ret);
}
for(q = p->object1; q; q = q->next) {
ret = IsNumericOrderOwner(p->object1);
if(ret != NULL) return(ret);
}
for(q = p->object2; q; q = q->next) {
ret = IsNumericOrderOwner(p->object2);
if(ret != NULL) return(ret);
}
return(NULL);
}
//
// 最上級形容詞を持つ文章 + <de> + <名詞>
//
void
CombineMostAmong(TOKEN *start)
{
TOKEN *p = start;
TOKEN *lastSentence = NULL;
TOKEN *compWord = NULL;
while(p) {
if(p->frPart == FR_PART_SENTENCE_NORMAL)
lastSentence = p;
p = p->next;
}
if(lastSentence == NULL) return;
compWord =IsWordOwner(lastSentence, FR_ATTRIB_TOP);
if(compWord == NULL) {
// il est le deuxi}me du monde. 世界中で2番目の
compWord = IsNumericOrderOwner(lastSentence);
}
if(compWord == NULL
// すでに、<de>(~の中で)を持っている。
|| SearchToken(FR_PART_PREPOSIT_DE, compWord->child) != NULL)
return;
TOKEN *next = lastSentence->next;
if(next
&& IsObjectMatch(next, FR_PART_PREPOSIT_DE)) {
ConvertSpecialWord(next, FR_PART_PREPOSIT_DE);
if(SelectJpPreposition2(compWord, next)) {
DisconnectTOKEN(start, next);
AddChildTOKEN(compWord, next);
compWord->scoreCombine += SCORE_MOST_AMONG;
}
}
}
TOKEN
*GetMostRightToken(TOKEN *p, FR_PART frPart)
{
if((p->frPart & FR_PART_COMBINE)
&& p->object2
&& IsObjectMatch(p->frPart, frPart)) {
return(GetMostRightToken(p->object2, frPart));
}
return(p);
}
//
// 文章 + 副詞 → 文章
//
void
CombineSentenceAdverb(TOKEN *start)
{
TOKEN *p = start;
TOKEN *lastSentence = NULL;
TOKEN *lastAdverb = NULL;
while(p && p->next) {
if(IsObjectMatch(p->frPart, FR_PART_SENTENCE_ALL)
&& (p->next->frPart & FR_PART_ADVERB)) {
lastSentence = p; lastAdverb = p->next;
}
if((p->frPart & FR_PART_ADVERB)
&& IsObjectMatch(p->next, FR_PART_SENTENCE_ALL)) {
lastAdverb = p; lastSentence = p->next;
}
p = p->next;
}
if(lastSentence == NULL) return;
if(lastSentence->frPart & FR_PART_COMBINE) {
lastSentence = GetMostRightToken(lastSentence, FR_PART_VERB_ALL);
}
if(lastAdverb->next == lastSentence)
// 副詞、文章 の順
lastAdverb->jpEmphasis |= JP_EMPHASIS_COMMA;
AttachAdverb(start, lastSentence, lastAdverb);
// 副詞と主語を入れ替える場合もある eg) Hier c'{tait dimanche.
ProcessJpHint(lastSentence->subject, lastSentence);
}
CMP_TOKEN pat_SentencePersonal[] = { // je le sais, moi.
CMP_TOKEN( FR_PART_SENTENCE_NORMAL,JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_NOUN_PERSONAL, JP_PROP_NONE, IsNotTokenCombine),
CMP_TOKEN( FR_PART_NONE)
};
CMP_TOKEN pat_PersonalSentence[] = { // moi, je le sais.
CMP_TOKEN( FR_PART_NOUN_PERSONAL, JP_PROP_NONE, IsNotTokenCombine),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SENTENCE_NORMAL,JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_NONE)
};
//
// 人称名詞を用いた強調形
// eg) moi, je le sais.
// je le sais, moi.
//
void
CombineSentencePersonal(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *sentence = NULL, *noun = NULL;
TOKEN *emphasis = NULL;
if(p == NULL) return;
if(IsPatternMatch(pat_SentencePersonal, p)) {
noun = SkipTokens(p, 2);
sentence = p;
} else if(IsPatternMatch(pat_PersonalSentence, p)) {
noun = p;
sentence = SkipTokens(p, 2);
} else
return;
if(emphasis == NULL)
emphasis = SearchToBeEmphasis(sentence, noun);
if(emphasis) {
// Moi aussi, je chante. -> 私<も>歌う
emphasis->jpEmphasis |= (noun->jpEmphasis | JP_EMPHASIS_GA);
emphasis->scoreCombine += SCORE_SENTENCE_EMPHASIS_NOUN;
DisconnectTOKEN(start, SkipTokens(p, 1)); // カンマを取り除く
DisconnectTOKEN(start, noun);
SelectJpVerb2(sentence->subject, sentence);
}
}
//
// 文章の中から強調すべき部分を探す
//
TOKEN
*SearchToBeEmphasis(TOKEN *p, TOKEN *key)
{
TOKEN *q;
if((p->frPart & FR_PART_NOUN)
&& IsAttribMatch(p, key))
return(p);
if(p->subject) {
for(q = p->subject; q; q = q->next) {
TOKEN *r = SearchToBeEmphasis(q, key);
if(r) return(r);
}
}
if(p->object1) {
for(q = p->object1; q; q = q->next) {
TOKEN *r = SearchToBeEmphasis(q, key);
if(r) return(r);
}
}
if(p->object2) {
for(q = p->object2; q; q = q->next) {
TOKEN *r = SearchToBeEmphasis(q, key);
if(r) return(r);
}
}
if(p->child) {
for(q = p->child; q; q = q->next) {
TOKEN *r = SearchToBeEmphasis(q, key);
if(r) return(r);
}
}
return(NULL);
}
BOOL
IsLastToken(TOKEN *p)
{
if(p->next == NULL
|| p->next->frPart == FR_PART_COMBINE_VERB)
return(TRUE);
return(FALSE);
}
// Dans la voiture, il y a un stylo.
CMP_TOKEN pat_PrepositionSentence1[] = {
CMP_TOKEN( FR_PART_PREPOSIT_ALL, JP_PROP_ALL, NULL),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsLastToken),
CMP_TOKEN( FR_PART_NONE )
};
// Dans la voiture il y a un stylo. (","がない)
CMP_TOKEN pat_PrepositionSentence2[] = {
CMP_TOKEN( FR_PART_PREPOSIT_ALL, JP_PROP_ALL, NULL),
CMP_TOKEN( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsLastToken),
CMP_TOKEN( FR_PART_NONE)
};
// Montez, jusqu'au moment. (","を外す)
CMP_TOKEN pat_Sentence_Preposition[] = {
CMP_TOKEN( FR_PART_SENTENCE_ALL, JP_PROP_ALL, NULL),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_PREPOSIT_ALL, JP_PROP_ALL, IsLastToken),
CMP_TOKEN( FR_PART_NONE)
};
BOOL
IsTokenPPastVerb(TOKEN *p)
{
if( p
&& p->subject == NULL
&& (p->frPart & FR_PART_VERB)
&& p->which
&& p->subject == NULL
&& (p->frTense & FR_TENSE_P_PAST)
&& (p->frPartChoice & FR_PART_INDEPENDENCE))
return(TRUE);
else
return(FALSE);
}
BOOL
IsTokenPPresentVerb(TOKEN *p)
{
if( p
&& (p->frPart & FR_PART_VERB)
&& p->which
// 現在分詞は主語がある場合もある
// && p->subject == NULL
&& (p->frTense & FR_TENSE_P_PRESENT)
&& (p->frPartChoice & FR_PART_INDEPENDENCE))
return(TRUE);
else
return(FALSE);
}
//
// 前置詞 + "," + 文章 -> 文章
//
void
CombinePrepositionSentence(TOKEN *start)
{
TOKEN *p = start->next;
for(p = start; p; p = p->next) {
if(p->frPart & FR_PART_PREPOSIT) { // IsObjectMatchでは、lui(FR_PART_NOUN_OBJECT_I)を前置詞と合致させてしまうため、
if(IsPatternMatch(pat_PrepositionSentence1, p)) {
TOKEN *comma = p->next, *sentence = p->next->next;
DisconnectTOKEN(start, comma); // ","を除く
CombineSentencePreposition(start, sentence, p);
} else if(IsPatternMatch(pat_PrepositionSentence2, p)) { // ","なし
TOKEN *sentence = p->next;
CombineSentencePreposition(start, sentence, p);
}
} else {
if(IsPatternMatch(pat_Sentence_Preposition, p)) {
TOKEN *comma = p->next, *prep = p->next->next;
if(prep->frPart & FR_PART_PREPOSIT) { // IsObjectMatchでは、lui(FR_PART_NOUN_OBJECT_I)を前置詞と合致させてしまうため、
DisconnectTOKEN(start, comma); // ","を除く
CombineSentencePreposition(start, p, prep);
}
}
}
}
}
//
// 分詞構文
//
void
CombinePPastSentence(TOKEN *start)
{
TOKEN *p = start->next;
if(IsTokenPPastVerb(p) == FALSE)
return;
TOKEN *verb = p;
do {
// 前置詞を飛ばす
// Couch{ <sur un canap{>, Jean lisait le journal.
p = p->next;
} while(p && (p->frPart & FR_PART_PREPOSIT));
if(p
&& (( p->frPart == FR_PART_SPECIAL_COMMA
&& p->next
&& IsObjectMatch(p->next, FR_PART_SENTENCE_NORMAL))
|| ( p->frPart == FR_PART_COMBINE_VERB
&& p->object1 == NULL))) {
TOKEN *altP = CopyCurrentTree(verb, "PPastSentence", FR_PART_INDEPENDENCE);
verb->subject = PutNullNoun();
verb->frTense = FR_TENSE_PRESENT; // 受け身ではない
verb->jpProp = (verb->jpProp * ~JP_PROP_PASSIVE);
verb->frPart = FR_PART_SENTENCE_PPAST;
isChanged = TRUE;
}
}
BOOL
IsTokenPPresentVerb_Last(TOKEN *p)
{
if(!(p->frPart & FR_PART_COMBINE)
&& IsTokenPPresentVerb(p)
&& IsLastToken(p))
return(TRUE);
return(FALSE);
}
CMP_TOKEN pat_SentencePPresent[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL, JP_PROP_NONE, NULL ),
CMP_TOKEN ( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL ),
CMP_TOKEN ( FR_PART_VERB_ALL, JP_PROP_NONE, IsTokenPPresentVerb_Last ),
CMP_TOKEN ( FR_PART_NONE)
};
//
// 現在分詞構文
//
void
CombinePPresentSentence(TOKEN *start)
{
TOKEN *p;
for(p = start; p; p = p->next) {
if(IsPatternMatch(pat_SentencePPresent, p)) {
CombineSentencePPresent(start, p, p->next->next);
DisconnectTOKEN(start, p->next); // Commaをとる
return;
}
}
p = start->next;
if(IsTokenPPresentVerb(p) == FALSE)
return;
TOKEN *verb = p;
do {
// 前置詞を飛ばす
// Couch{ <sur un canap{>, Jean lisait le journal.
p = p->next;
} while(p && (p->frPart & FR_PART_PREPOSIT));
if(p
&& p->frPart == FR_PART_SPECIAL_COMMA
&& p->next
&& IsObjectMatch(p->next, FR_PART_SENTENCE_NORMAL)) {
CombineSentencePPresent(start, p->next, verb);
DisconnectTOKEN(start, p);
} else if(p
&& IsObjectMatch(p, FR_PART_COMBINE_VERB, JP_PROP_NONE, HasNoObjects, ",")) {
CombineSentencePPresent(start, p, verb);
}
}
void
CombineSentencePPresent(TOKEN *start, TOKEN *sentence, TOKEN *present)
{
// TOKEN *altP = CopyCurrentTree(sentence, "PPresentSentence", FR_PART_INDEPENDENCE);
if(present->subject == NULL) {
present->subject = sentence->subject;
present->synSubject = FALSE;
}
present->frPart = FR_PART_SENTENCE_PPRESENT;
present->jpEmphasis |= JP_EMPHASIS_COMMA;
DisconnectTOKEN(start, present);
AddChildTOKEN(sentence, present);
sentence->scoreCombine += SCORE_SENTENCE_PPRESENT;
}
void
CombineSentencePreposition(TOKEN *start, TOKEN *sentence, TOKEN *preposition)
{
// 文章に掛ける
if(SelectJpPreposition2(sentence, preposition)) {
DisconnectTOKEN(start, preposition);
AddChildTOKEN(sentence, preposition);
preposition->jpEmphasis |= JP_EMPHASIS_COMMA;
}
}
//
// 過去分詞を用いた分詞構文
//
void
CombineSentenceParticiple(TOKEN *start, TOKEN *sentence, TOKEN *verb)
{
TOKEN *subject = sentence->object1;
// 文章が主語を持っていないとき
if(subject == NULL) subject = sentence;
// まず、文章に掛けてみる
SelectJpVerb2(subject, verb);
if(isChanged) {
DisconnectTOKEN(start, verb);
AddChildTOKEN(sentence, verb);
if(verb->frTense & FR_TENSE_P_PAST)
verb->frPart = FR_PART_SENTENCE_PPAST;
if(verb->frTense & FR_TENSE_P_PRESENT)
verb->frPart = FR_PART_SENTENCE_PPRESENT;
}
}
void
SucceedSentences(TOKEN *sent, TOKEN *conj)
{
sent->frPart = conj->frPart;
sent->what = conj->what;
sent->which = conj->which;
if(conj->child)
AddChildTOKEN(sent, conj->child);
}
//
// 名詞 + 現在分詞の文章
//
void
ChangeParticipleToSentence(TOKEN *start)
{
TOKEN *p = start;
TOKEN *verb = NULL;
TOKEN *subject = NULL;
while(p) {
if(p->frPart & FR_PART_NOUN) subject = p;
if(p->frPart & FR_PART_VERB) verb = p;
p = p->next;
}
if(CanBeSubject(subject) == FALSE) return;
if(verb != NULL) return;
verb = SearchToken(FR_PART_VERB_GENERAL, subject->child);
if(verb
&& verb->frTense == FR_TENSE_P_PRESENT) {
DisconnectChildToken(subject, verb);
InsertTOKEN(subject, verb);
CombineSubjectVerb(start, subject, verb);
}
}
BOOL
IsFirstToken(TOKEN *p)
{
if(currentTree->next == p) return(TRUE);
return(FALSE);
}
BOOL
HaveObject12(TOKEN *p)
{
if(p->object1 && p->object2) return(TRUE);
else return(FALSE);
}
BOOL
HaveObject2(TOKEN *p)
{
if(p->object2) return(TRUE);
else return(FALSE);
}
void
CombineSentenceEt(TOKEN *start)
{
TOKEN *p = start;
// これは、文頭から、まとめ始まる。
for(p = start; p && p->next; p = p->next) {
TOKEN *comma = NULL, *next = p->next;
if(!IsObjectMatch(p, (FR_PART)(FR_PART_SENTENCE_ALL | FR_PART_INDEPENDENCE_ALL))
|| (p->frTense & FR_TENSE_P_PRESENT)
|| (p->frTense & FR_TENSE_P_PAST)) continue;
// (Montez, grimpez), pers{v{rez. -> Montez, (grimpez, pers{v{rez).
if((p->frPart & FR_PART_COMBINE)
&& p->object1 == NULL) continue;
if(next->next
&& next->frPart == FR_PART_SPECIAL_COMMA) {
comma = next;
next = next->next;
}
if((next->frPart & FR_PART_COMBINE)
&& IsObjectMatch(next, FR_PART_SENTENCE_NORMAL)
&& next->object1 == NULL
&& next->object2 != NULL) {
if(comma) DisconnectTOKEN(start, comma);
CombineSentenceEt(start, p, next);
}
}
}
void
CombineSentenceEt(TOKEN *start, TOKEN *p, TOKEN *et)
{
JP_COMBINE *jpCombine = MatchJpCombine12(et, p, et->object2);
if(jpCombine == NULL) return;
et->object1 = p;
DisconnectTOKEN(start, p);
et->frPart = jpCombine->myFrPart;
if(jpCombine->myFrPart == FR_PART_COMBINE_VERB)
et->frPart = (FR_PART)(FR_PART_COMBINE | et->object2->frPart);
et->which = jpCombine;
et->jpProp = et->object1->jpProp | et->object2->jpProp;
et->jpProp &= ~(JP_PROP_QUESTION | JP_PROP_NEGATIVE | JP_PROP_PASSIVE);
et->scoreCombine += SCORE_MAKE_SENTENCE;
FixNoSubjectSentence(et, et->object1, et->object2);
SelectJpCombine12(et, et->object1, et->object2);
if(IsObjectMatch(et->object1, FR_PART_SENTENCE_PPAST)) {
ImproveSubjectVerb(et->object2->subject, et->object1);
}
if(jpCombine->Exec) (jpCombine->Exec)(et);
}
void
FixNoSubjectSentence(TOKEN *p, TOKEN *verb1, TOKEN *verb2)
{
if(p == NULL
|| verb1 == NULL
|| verb2 == NULL)
return;
if(verb2->frPart == FR_PART_COMBINE_VERB) {
FixNoSubjectSentence(p, verb1, verb2->object1);
FixNoSubjectSentence(p, verb1, verb2->object2);
}
if(verb1->subject == NULL
|| verb2->subject)
return;
if(IsObjectMatch(verb2, FR_PART_SENTENCE_NO_SUBJECT)) {
// ils s'accordent et s'entendent bien.
// ils se entendent -> 再起代名詞として処理できるように
verb2->subject = verb1->subject;
ImproveSubjectVerb(verb1->subject, verb2);
}
}
//
// 前置詞無しの名詞修飾を、文章に加える
// eg) J'ai vu M.Dupont la semaine derni}re.
//
CMP_TOKEN pat_SentenceNounTime1[] = {
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_TIME, HasArticleDefinitOrUnique ),
CMP_TOKEN ( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL ),
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsLastToken ),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_SentenceNounTime2[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, NULL),
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_TIME, HasArticleDefinitOrUnique ),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_SentenceNounTime3[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, NULL),
CMP_TOKEN ( FR_PART_NOUN_ALL, JP_PROP_TIME, NULL, "tout"),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_SentenceNounTime4[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, NULL),
CMP_TOKEN ( FR_PART_NOUN_ALL, JP_PROP_TIME, NULL, "toute"),
CMP_TOKEN ( FR_PART_NONE)
};
void
CombineSentenceNoun(TOKEN *start)
{
CombineSentenceNounPPresent( start );
if(!isChanged) CombineSentenceNounTime( start );
if(!isChanged) CombineSentenceNounCe( start );
if(!isChanged) CombineSentencePersonal( start );
if(!isChanged) CombineSentenceNounMisc( start );
}
//
// 名詞+現在分詞, S+V
//
BOOL
HasPPresentVerb(TOKEN *p)
{
if(!(p->frPartChoice & FR_PART_INDEPENDENCE))
return(FALSE);
TOKEN *verb = SearchToken(FR_PART_SENTENCE_PPRESENT, p->child);
if(verb
&& verb->subject == NULL)
return(TRUE);
return(FALSE);
}
CMP_TOKEN pat_SentenceNounPPresent1[] = {
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_NONE, HasPPresentVerb ),
CMP_TOKEN ( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL ),
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsLastToken ),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_SentenceNounPPresent2[] = {
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_NONE, HasPPresentVerb ),
CMP_TOKEN ( FR_PART_COMBINE_VERB, JP_PROP_NONE, HasNoObjects ),
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsLastToken ),
CMP_TOKEN ( FR_PART_NONE)
};
void
CombineSentenceNounPPresent(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *noun = NULL;
if(IsPatternMatch(pat_SentenceNounPPresent1, p)
|| IsPatternMatch(pat_SentenceNounPPresent2, p)) {
noun = p;
} else
return;
// (time, S) + V -> time, (S+V)になるように
noun->scoreCombine += SCORE_SENTENCE_NOUN;
TOKEN *altP = CopyCurrentTree(noun, "PPresentSentence", FR_PART_INDEPENDENCE);
TOKEN *verb = SearchToken(FR_PART_SENTENCE_PPRESENT, noun->child);
DisconnectChildToken(noun, verb);
InsertTOKEN(noun, verb);
DisconnectTOKEN(start, noun);
verb->subject = noun;
verb->synSubject = TRUE;
SelectJpVerb2(verb->subject, verb);
}
void
CombineSentenceNounTime(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *sentence = NULL;
TOKEN *noun = NULL;
TOKEN *comma = NULL;
if(IsPatternMatch(pat_SentenceNounTime1, p)) {
noun = p; comma = p->next; sentence = p->next->next;
// (time, S) + V -> time, (S+V)になるように
noun->scoreCombine += SCORE_SENTENCE_NOUN;
} else {
while(p && p->next) {
if(IsPatternMatch(pat_SentenceNounTime2, p)
|| IsPatternMatch(pat_SentenceNounTime3, p)
|| IsPatternMatch(pat_SentenceNounTime4, p)) {
sentence = p; noun = p->next;
}
p = p->next;
}
}
if(sentence == NULL) return;
if(comma) DisconnectTOKEN(start, comma);
DisconnectTOKEN(start, noun); // コンマを除く
AddChildTOKEN(sentence, noun);
noun->jpEmphasis |= JP_EMPHASIS_COMMA;
SelectJpVerb2(sentence->subject, sentence);
}
CMP_TOKEN pat_SentenceCe1[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsSubjectPersonalCe),
CMP_TOKEN ( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_ALL, HasArticleDefinitOrUnique_End ),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_SentenceCe2[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsSubjectPersonalCe),
CMP_TOKEN ( FR_PART_COMBINE_NOUN, JP_PROP_NONE, HasNoObjects),
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_ALL, HasArticleDefinitOrUnique_End ),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_CeSentence1[] = {
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_ALL, HasArticleDefinitOrUnique),
CMP_TOKEN ( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsSubjectPersonalCe_End),
CMP_TOKEN ( FR_PART_NONE)
};
CMP_TOKEN pat_CeSentence2[] = {
CMP_TOKEN ( FR_PART_NOUN_GENERAL, JP_PROP_ALL, HasArticleDefinitOrUnique),
CMP_TOKEN ( FR_PART_COMBINE_NOUN, JP_PROP_NONE, HasNoObjects),
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsSubjectPersonalCe_End),
CMP_TOKEN ( FR_PART_NONE)
};
// Ce que S+V, c'est A.
CMP_TOKEN pat_CeSentence3[] = {
CMP_TOKEN ( FR_PART_NOUN_SUBJECT, "ce", HasQue),
CMP_TOKEN ( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, IsSubjectPersonalCe_End),
CMP_TOKEN ( FR_PART_NONE)
};
void
CombineSentenceNounCe(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *sentence = NULL;
TOKEN *noun = NULL;
if(IsPatternMatch(pat_SentenceCe1, p)
|| IsPatternMatch(pat_SentenceCe2, p)) {
sentence = p; noun = p->next->next;
DisconnectTOKEN(start, p->next); // コンマを除く
} else if(IsPatternMatch(pat_CeSentence1, p)
|| IsPatternMatch(pat_CeSentence2, p)
|| IsPatternMatch(pat_CeSentence3, p)) {
noun = p; sentence = p->next->next;
DisconnectTOKEN(start, p->next); // コンマを除く
} else
return;
DisconnectTOKEN(start, noun);
sentence->subject = noun;
noun->jpEmphasis |= JP_EMPHASIS_CE;
noun->scoreCombine += SCORE_SENTENCE_EMPHASIS_NOUN;
SelectJpVerb2(sentence->subject, sentence);
}
CMP_TOKEN pat_SentenceChacune[] = {
CMP_TOKEN ( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, NULL),
CMP_TOKEN ( FR_PART_NOUN_ALL, "chacune"),
CMP_TOKEN ( FR_PART_NONE)
};
void
CombineSentenceNounMisc(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *sentence = NULL;
TOKEN *noun = NULL;
for(p = start; p; p = p->next) {
if(IsPatternMatch(pat_SentenceChacune, p)) {
sentence = p; noun = p->next;
}
}
if(sentence == NULL) return;
DisconnectTOKEN(start, noun);
AddChildTOKEN(sentence, noun);
SelectJpVerb2(sentence->subject, sentence);
}
// Apres avoir parle, il est parti.
CMP_TOKEN pat_PrepositSentence1[] = {
CMP_TOKEN( FR_PART_PREPOSIT_ALL, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, NULL),
CMP_TOKEN( FR_PART_NONE)
};
// je crois qu'@ son arriv{, il sera accueilli.
CMP_TOKEN pat_PrepositSentence2[] = {
CMP_TOKEN( FR_PART_SENTENCE_QUE, JP_PROP_NONE, HasNoObjects),
CMP_TOKEN( FR_PART_PREPOSIT_ALL, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_SENTENCE_NORMAL,JP_PROP_ALL, NULL),
CMP_TOKEN( FR_PART_NONE)
};
void
ChangeSentenceToAdverb(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *sentence = NULL;
TOKEN *adverb = NULL;
TOKEN *disconnect = NULL;
if(IsPatternMatch(pat_PrepositSentence1, p)) {
adverb = p; sentence = p->next->next;
disconnect = p->next; // Disconnect ","
}
while(p && p->next) {
if(IsPatternMatch(pat_PrepositSentence2, p)) {
adverb = p->next; sentence = SkipTokens(p, 3);
disconnect = SkipTokens(p, 2); // Disconnect ","
}
p = p->next;
}
if(sentence == NULL) return;
if(disconnect) DisconnectTOKEN(start, disconnect);
DisconnectTOKEN(start, adverb);
AddChildTOKEN(sentence, adverb);
}
BOOL
HasRelatives(TOKEN *p)
{
if(p) {
if(p->child && HasRelatives(p->child)) return(TRUE);
if(p->object1 && HasRelatives(p->object1)) return(TRUE);
if(p->object2 && HasRelatives(p->object2)) return(TRUE);
if(SearchToken(FR_PART_RELATIVE_ALL, p)) return(TRUE);
}
return(FALSE);
}
BOOL
HasNoRelatives(TOKEN *p)
{
if(p) {
if(p->child && HasRelatives(p->child)) return(FALSE);
if(p->object1 && HasRelatives(p->object1)) return(FALSE);
if(p->object2 && HasRelatives(p->object2)) return(FALSE);
}
return(TRUE);
}
// Il v{cut malheureux.
CMP_TOKEN pat_SentenceAdjective[] = {
CMP_TOKEN( FR_PART_SENTENCE_NORMAL, JP_PROP_ALL, HasNoRelatives), // S1V2 <que> S2V2 ADJ -> ADJは、S2V2に掛るべき
CMP_TOKEN( FR_PART_ADJECTIVE_GENERAL, JP_PROP_CONDITION | JP_PROP_VOLUME, IsLastToken),
CMP_TOKEN( FR_PART_NONE)
};
void
CombineSentenceAdjective(TOKEN *start)
{
TOKEN *p = start->next;
TOKEN *sentence = NULL;
TOKEN *adjective = NULL;
for(p = start->next; p; p= p->next) {
if(IsPatternMatch(pat_SentenceAdjective, p)
&& !(p->next->frPart & FR_PART_INTERROGATIVE)) {
sentence = p; adjective = p->next;
}
}
if(sentence == NULL) return;
AttachAdjective(start, sentence, adjective, FR_LOCATION_BEHIND);
// il voyage <seul>. seulは、副詞・形容詞どちらでもよいが、副詞 > 形容詞
sentence->scoreCombine += SCORE_SENTENCE_ADJECTIVE;
}
//
// 独立文を処理
// que faire? 何をすべきか?
//
void
ProcessIndependantSentence(TOKEN *start)
{
TOKEN *p;
TOKEN *lastInter = NULL;
for(p = start; p; p = p->next) {
if(p->frPart == FR_PART_INTERROGATIVE_UNCLEAR
&& p->which == NULL
&& p->next
&& (p->next->frPart & FR_PART_VERB)
&& (p->next->frTense & FR_TENSE_ORIGIN))
lastInter = p;
}
if(lastInter == NULL) return;
TOKEN *verb = lastInter->next;
// Que faire? -> Que on faire? 後は、通常の疑問文と同じ
verb->jpProp |= JP_PROP_QUESTION;
// verb->frAttrib = FR_ATTRIB_IL;
if(verb->object2) { // 目的語2を放棄
InsertTOKEN(verb, verb->object2);
verb->object2 = NULL;
}
if(verb->object1) { // 目的語1を放棄
InsertTOKEN(verb, verb->object1);
verb->object1 = NULL;
}
verb->which = NULL; // 仕切り直し
TOKEN *subject = PutNullNoun();
subject->frPart = FR_PART_NOUN_SUBJECT;
subject->frAttrib = FR_ATTRIB_IL;
subject->frTense = FR_TENSE_ORIGIN; // 原形独立文の印
InsertTOKEN(lastInter, subject);
TOKEN *que = PutNullNoun();
ChangeToSpecial(que, "que");
que->frPartChoice = FR_PART_VERB;
InsertTOKEN(lastInter, que);
SwapToken(lastInter, que);
}
// il traduit, dans ses langue, sa solitude.
CMP_TOKEN pat_InsertedPreposition[] = {
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_PREPOSIT_ALL, JP_PROP_ALL, CanVerbParent),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_NONE)
};
void
ProcessInsertedPreposition(TOKEN *start)
{
TOKEN *p;
TOKEN *lastComma = NULL;
for(p = start; p; p = p->next) {
if(IsPatternMatch(pat_InsertedPreposition, p)) {
lastComma = p;
}
}
if(lastComma == NULL) return;
TOKEN *inserted = SkipTokens(lastComma, 1);
// 後続の文章にかからないように -> 接続詞をまたがない
TOKEN *lastToken = SearchToken(FR_PART_SENTENCE_ALL, inserted);
if(lastToken == NULL)
lastToken = GetLastTOKEN(start);
DisconnectTOKEN(start, SkipTokens(lastComma, 2));
DisconnectTOKEN(start, inserted);
DisconnectTOKEN(start, lastComma);
inserted->punctuation = FR_PUNCT_COMMA;
inserted->frPartParent = FR_PART_VERB;
inserted->frPartChoice = FR_PART_NONE;
InsertTOKEN(lastToken, inserted);
}
//
// 呼びかけなどの挿入句の処理
//
BOOL
CanBeInterjection(TOKEN *p)
{
if((p->frPartChoice & FR_PART_INDEPENDENCE)
&& SearchToken(FR_PART_ADJECTIVE_EXCLAMINATION, p->child) == NULL)
return(TRUE);
else
return(FALSE);
}
// je vous prie, Monsieur, de agger ...
CMP_TOKEN pat_NounToInetjection[] = {
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_NOUN_GENERAL, JP_PROP_HUMAN, CanBeInterjection),
CMP_TOKEN( FR_PART_SPECIAL_COMMA, JP_PROP_NONE, NULL),
CMP_TOKEN( FR_PART_NONE)
};
void
ChangeNounToInterjection(TOKEN *start)
{
TOKEN *p;
TOKEN *lastComma = NULL;
for(p = start; p; p = p->next) {
if(IsPatternMatch(pat_NounToInetjection, p)) {
lastComma = p;
}
}
if(lastComma == NULL) return;
TOKEN *parent = SearchToken(FR_PART_VERB, start);
if(parent == NULL)
parent = PreviousToken(start, lastComma);
if(parent == NULL)
return;
TOKEN *prev = PreviousToken(start, lastComma);
prev->punctuation = FR_PUNCT_COMMA;
TOKEN *interjection = SkipTokens(lastComma, 1);
DisconnectTOKEN(start, SkipTokens(lastComma, 2));
DisconnectTOKEN(start, interjection);
interjection->frPart = FR_PART_INDEPENDENCE_NOUN;
// 動詞と動詞の間に、感嘆詞が入る場合もあるので、","は取っておく
// eg) Courte la t]te, <Sicambre>, bru~le ce que tu as ador{.
// DisconnectTOKEN(start, lastComma);
lastComma->frPartChoice = FR_PART_COMBINE_VERB;
AddChildTOKEN(parent, interjection);
}
//
// 文章に、間投詞を足す。
//
void
CombineSentenceInterjection(TOKEN *start)
{
if(start->next
&& start->next->next
&& start->next->next->frPart == FR_PART_SPECIAL_COMMA
&& start->next->next->next
&& IsObjectMatch(start->next->next->next, FR_PART_INDEPENDENCE_ALL)) {
TOKEN *interjection = SkipTokens(start, 3);
TOKEN *sentence = SkipTokens(start, 1);
DisconnectTOKEN(start, SkipTokens(start, 2));
DisconnectTOKEN(start, interjection);
AddChildTOKEN(sentence, interjection);
}
}
//
// 単純未来をもちいた文章 -> 命令形に
// Tu feras tes devoirs.
// あなたはあなたの義務をするだろう -> あなたの義務をしなさい
//
void
ChangeSimpleFutureToOrder(TOKEN *start)
{
TOKEN *sentence = start;
if(sentence == NULL) return;
if( sentence->frPart == FR_PART_COMBINE_VERB
&& (sentence->object2)
&& (IsTokenFrench(sentence, ":") || (sentence->object2->frPart & FR_PART_COMBINE))) {
// Vous aurez cong{ : vous irez visiter le mus{e.
// あなたは休みなので、美術館に行きなさい。
ChangeSimpleFutureToOrder(sentence->object2);
}
if( sentence
&& sentence->next == NULL
&& IsObjectMatch(sentence, FR_PART_SENTENCE_ALL)
&& sentence->subject
&& (sentence->subject->frPart & FR_PART_NOUN)
&& (sentence->subject->frAttrib & FR_ATTRIB_LEVEL2)
&& (sentence->frPart & FR_PART_VERB)
&& (sentence->frTense == FR_TENSE_FUTURE_SIMPLE)
&& !(sentence->jpProp & JP_PROP_QUESTION)) {
sentence->subject->prtControl = PRT_CONTROL_DISABLE;
sentence->frTense = FR_TENSE_ORDER;
}
}
//
// 否定冠詞として処理した<de>のparentは、本当に否定されているか?
//
void
CheckNegativeArticle(TOKEN *start, BOOL isNegative)
{
TOKEN *p;
// if(IsThereOtherSentence() == FALSE) return;
if(currentTree->prtControl == PRT_CONTROL_DISABLE)
return;
for(p = start; p; p = p->next) {
if(IsObjectMatch(p, FR_PART_ARTICLE_NEGATIVE)
&& isNegative == FALSE) {
currentTree->prtControl = PRT_CONTROL_DISABLE;
}
if(p->jpProp & JP_PROP_NEGATIVE) isNegative = TRUE;
if(p->child) CheckNegativeArticle(p->child, isNegative);
if(p->object1) CheckNegativeArticle(p->object1, isNegative);
if(p->object2) CheckNegativeArticle(p->object2, isNegative);
}
}
//
// TOKEN->PREPOSITION中、使われていない前置詞をチェック
//
void
CheckUnusedPreposition(TOKEN *start)
{
TOKEN *p;
if(currentTree->prtControl == PRT_CONTROL_DISABLE)
return;
for(p = start; p; p = p->next) {
if(p->preposition) {
currentTree->prtControl = PRT_CONTROL_DISABLE;
}
if(p->child) CheckUnusedPreposition(p->child);
if(p->object1) CheckUnusedPreposition(p->object1);
if(p->object2) CheckUnusedPreposition(p->object2);
}
}
void
CheckIllegalPreposition(TOKEN *start)
{
TOKEN *p;
for(p = start; p && p->next; p = p->next) {
TOKEN *next = p->next;
// 動詞の目的語になれなかった前置詞
if((p->frPart & FR_PART_VERB)
&& p->which
&& (next->frPart & FR_PART_PREPOSIT)
&& (next->frPartChoice == FR_PART_OBJECT)) {
currentTree->prtControl = PRT_CONTROL_DISABLE;
isChanged = TRUE;
break;
}
}
}
void
CheckIllegalCombine(TOKEN *start)
{
TOKEN *p;
for(p = start; p; p = p->next) {
if(IsObjectMatch(p, FR_PART_SENTENCE_ALL) // この連結詞は、誤用だった(おてつき)
&& p->next
&& (p->next->frPart & FR_PART_COMBINE)
&& !(p->next->frPart & FR_PART_VERB)
&& (p->next->object1 == NULL || p->next->object2 == NULL)
&& p->next->next
&& IsObjectMatch(p->next->next, FR_PART_SENTENCE_ALL)) {
currentTree->prtControl = PRT_CONTROL_DISABLE;
return;
}
}
}
void
CheckIllegalCommaCombine(TOKEN *start)
{
TOKEN *p;
for(p = start; p && p->next; p = p->next) {
TOKEN *next = p->next;
if(!(next->frPart & FR_PART_NOUN))
continue;
if((p->frPart & FR_PART_PREPOSIT)
&& !(p->frPart & FR_PART_COMBINE)
&& p->which == NULL
&& MatchPrepositionObject(p, next)) {
p = next;
continue;
}
if((p->frPart & FR_PART_VERB)
&& p->which == NULL) {
p = next;
continue;
}
TOKEN *combine = next->next;
if( combine
&& !(combine->frPart & FR_PART_COMBINE)
&& IsObjectMatch(combine, FR_PART_COMBINE_NOUN, JP_PROP_NONE, NULL, ",")
&& combine->object1
&& combine->object2) {
currentTree->prtControl = PRT_CONTROL_DISABLE;
isChanged = TRUE;
break;
}
}
}
void
ProcessInsertedComma(TOKEN *start)
{
TOKEN *prev = start;
TOKEN *p = prev->next;
for(; p && p->next; prev = p, p = p->next) {
TOKEN *next = p->next;
if(p->frPart != FR_PART_SPECIAL_COMMA)
continue;
// Tout ce que tu as observ{ en France<,> a bien chang{.
if((prev->frPart & FR_PART_NOUN)
&& CountTokens(prev->child) >= 4
&& (next->frPart & FR_PART_VERB)) {
prev->punctuation = FR_PUNCT_COMMA;
DisconnectTOKEN(start, p);
return;
}
if(IsObjectMatch(prev, FR_PART_VERB_UNCLEAR)
&& prev->punctuation == FR_PUNCT_COMMA) {
// Donnez<, Monsieur>, le stylo @ moi.
DisconnectTOKEN(start, p);
return;
}
}
}
//
// 親がいないTokenをチェック
//
void
MarkRootTokens(TOKEN *start)
{
TOKEN *p;
for(p = start; p; p = p->next) {
p->jpEmphasis |= JP_EMPHASIS_ROOT;
}
}
//
// 比較対象がない<Aussi>をチェック
// eg) D'autres sont <aussi> contents.
// 他の人は<null><くらい>幸せだ -> 他の人<も>幸せだ
//
void
CheckNoChildAussi(TOKEN *start)
{
TOKEN *p;
TOKEN *lastSentence = NULL;
for(p = start; p; p = p->next) {
if(IsObjectMatch(p, FR_PART_SENTENCE_NORMAL))
lastSentence = p;
}
if(lastSentence == NULL) return;
TOKEN *compWord =IsWordOwner(lastSentence, FR_ATTRIB_AUSSI);
if(compWord == NULL) return;
// すでに、<de>(~の中で)を持っている。
if(SearchToken(FR_PART_CONJUNCTION_THAN, compWord->child))
return;
if(lastSentence->subject) {
compWord->frAttrib = (FR_ATTRIB)(compWord->frAttrib & ~FR_ATTRIB_AUSSI);
lastSentence->subject->jpEmphasis |= JP_EMPHASIS_MO;
}
}
//
// FR_PART_SENTENCEに掛かっている副詞をFR_PART_VERB ->childに移す
//
void
MoveSentenceAdverbToVerb(TOKEN *start)
{
TOKEN *p, *q;
for(p = start; p; p = p->next) {
if(p->child) MoveSentenceAdverbToVerb(p->child);
if(p->object1) MoveSentenceAdverbToVerb(p->object1);
if(p->object2) MoveSentenceAdverbToVerb(p->object2);
if(IsObjectMatch(p, FR_PART_SENTENCE_NORMAL)) {
for(q = p->child; q; q = q->next) {
if(IsObjectMatch(q, FR_PART_PREPOSIT_ALL)
|| IsObjectMatch(q, FR_PART_ADVERB_ALL)) {
DisconnectChildToken(p, q);
AddChildTOKEN(p->object2, q);
}
}
}
}
}